u64 core_asid_generation;
u32 next_asid;
u32 max_asid;
- u32 erratum170;
+ u32 erratum170:1;
};
static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
}
/*
- * Increase the Generation to make free ASIDs. Flush physical TLB and give
- * ASID.
+ * Increase the Generation to make free ASIDs, and indirectly cause a
+ * TLB flush of all ASIDs on the next vmrun.
*/
-static void svm_asid_handle_inc_generation(struct vcpu *v)
+void svm_asid_inc_generation(void)
{
struct svm_asid_data *data = svm_asid_core_data();
- if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
+ if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
{
- /* Handle ASID overflow. */
+ /* Move to the next generation. We can't flush the TLB now
+ * because you need to vmrun to do that, and current might not
+ * be a HVM vcpu, but the first HVM vcpu that runs after this
+ * will pick up ASID 1 and flush the TLBs. */
data->core_asid_generation++;
- data->next_asid = SVM_ASID_FIRST_GUEST_ASID + 1;
-
- /* Handle VCPU. */
- v->arch.hvm_svm.vmcb->guest_asid = SVM_ASID_FIRST_GUEST_ASID;
- v->arch.hvm_svm.asid_generation = data->core_asid_generation;
-
- /* Trigger flush of physical TLB. */
- v->arch.hvm_svm.vmcb->tlb_control = 1;
+ data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
return;
}
* this core (flushing TLB always). So correctness is established; it
* only runs a bit slower.
*/
- printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
- data->erratum170 = 1;
- data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
-
- svm_asid_init_vcpu(v);
+ if ( !data->erratum170 )
+ {
+ printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
+ data->erratum170 = 1;
+ data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
+ }
}
/*
return;
}
- /* Different ASID generations trigger fetching of a fresh ASID. */
- if ( likely(data->next_asid <= data->max_asid) )
- {
- /* There is a free ASID. */
- v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
- v->arch.hvm_svm.asid_generation = data->core_asid_generation;
- v->arch.hvm_svm.vmcb->tlb_control = 0;
- return;
- }
+ /* If there are no free ASIDs, need to go to a new generation */
+ if ( unlikely(data->next_asid > data->max_asid) )
+ svm_asid_inc_generation();
+
+ /* Now guaranteed to be a free ASID. */
+ v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
+ v->arch.hvm_svm.asid_generation = data->core_asid_generation;
- /* Slow path, may cause TLB flush. */
- svm_asid_handle_inc_generation(v);
+ /* When we assign ASID 1, flush all TLB entries. We need to do it
+ * here because svm_asid_inc_generation() can be called at any time,
+ * but the TLB flush can only happen on vmrun. */
+ if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID )
+ v->arch.hvm_svm.vmcb->tlb_control = 1;
+ else
+ v->arch.hvm_svm.vmcb->tlb_control = 0;
}
void svm_asid_inv_asid(struct vcpu *v)
v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
}
+static void svm_flush_guest_tlbs(void)
+{
+ /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
+ * next VMRUN. (If ASIDs are disabled, the whole TLB is flushed on
+ * VMRUN anyway). */
+ svm_asid_inc_generation();
+}
+
static void svm_update_vtpr(struct vcpu *v, unsigned long value)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
.get_segment_register = svm_get_segment_register,
.update_host_cr3 = svm_update_host_cr3,
.update_guest_cr3 = svm_update_guest_cr3,
+ .flush_guest_tlbs = svm_flush_guest_tlbs,
.update_vtpr = svm_update_vtpr,
.stts = svm_stts,
.set_tsc_offset = svm_set_tsc_offset,
vmx_vmcs_exit(v);
}
+static void vmx_flush_guest_tlbs(void)
+{
+ /* No tagged TLB support on VMX yet. The fact that we're in Xen
+ * at all means any guest will have a clean TLB when it's next run,
+ * because VMRESUME will flush it for us. */
+}
static void vmx_inject_exception(
unsigned int trapnr, int errcode, unsigned long cr2)
.get_segment_register = vmx_get_segment_register,
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr3 = vmx_update_guest_cr3,
+ .flush_guest_tlbs = vmx_flush_guest_tlbs,
.update_vtpr = vmx_update_vtpr,
.stts = vmx_stts,
.set_tsc_offset = vmx_set_tsc_offset,
*/
void (*update_guest_cr3)(struct vcpu *v);
+ /*
+ * Called to ensure than all guest-specific mappings in a tagged TLB
+ * are flushed; does *not* flush Xen's TLB entries, and on
+ * processors without a tagged TLB it will be a noop.
+ */
+ void (*flush_guest_tlbs)(void);
+
/*
* Reflect the virtual APIC's value in the guest's V_TPR register
*/
};
extern struct hvm_function_table hvm_funcs;
+extern int hvm_enabled;
int hvm_domain_initialise(struct domain *d);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
+static inline void
+hvm_flush_guest_tlbs(void)
+{
+ if ( hvm_enabled )
+ hvm_funcs.flush_guest_tlbs();
+}
+
void hvm_hypercall_page_initialise(struct domain *d,
void *hypercall_page);